bitkeeper revision 1.1366 (4268c126o36cKcnzrSkVxkbrPsoz1g)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 22 Apr 2005 09:17:26 +0000 (09:17 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 22 Apr 2005 09:17:26 +0000 (09:17 +0000)
Clean up shadow destruction and fix domain destroy when shadow mode
is disabled.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/domain.c
xen/arch/x86/shadow.c
xen/arch/x86/vmx_vmcs.c
xen/common/page_alloc.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/shadow.h
xen/include/xen/shadow.h

index 75b165b3c0441e2c4ac85745d2b97c78c8a10f51..1b4f0ca445df2ac6012070d3568a1f50f265f872 100644 (file)
@@ -991,36 +991,24 @@ void domain_relinquish_resources(struct domain *d)
     {
         if ( pagetable_val(ed->arch.guest_table) != 0 )
         {
-            struct pfn_info *page =
-                &frame_table[pagetable_val(ed->arch.guest_table)>>PAGE_SHIFT];
-
-            if ( shadow_mode_enabled(d) )
-                put_page(page);
-            else
-                put_page_and_type(page);
-
+            (shadow_mode_enabled(d) ? put_page : put_page_and_type)
+                (&frame_table[pagetable_val(
+                    ed->arch.guest_table) >> PAGE_SHIFT]);
             ed->arch.guest_table = mk_pagetable(0);
         }
 
         if ( pagetable_val(ed->arch.guest_table_user) != 0 )
         {
-            struct pfn_info *page =
-                &frame_table[pagetable_val(ed->arch.guest_table_user)
-                             >> PAGE_SHIFT];
-
-            if ( shadow_mode_enabled(d) )
-                put_page(page);
-            else
-                put_page_and_type(page);
-
+            (shadow_mode_enabled(d) ? put_page : put_page_and_type)
+                (&frame_table[pagetable_val(
+                    ed->arch.guest_table_user) >> PAGE_SHIFT]);
             ed->arch.guest_table_user = mk_pagetable(0);
         }
 
         vmx_relinquish_resources(ed);
     }
 
-    /* Exit shadow mode before deconstructing final guest page table. */
-    shadow_mode_destroy(d);
+    shadow_mode_disable(d);
 
     /*
      * Relinquish GDT mappings. No need for explicit unmapping of the LDT as 
index a99902062aebc0a9f7e4ff62c017904a2e5189c3..80b4c0954888b20753617ba7c3d746b3c09cedbf 100644 (file)
@@ -1111,9 +1111,17 @@ static void free_out_of_sync_entries(struct domain *d)
             d->arch.out_of_sync_extras_count);
 }
 
-void shadow_mode_destroy(struct domain *d)
+void __shadow_mode_disable(struct domain *d)
 {
-    shadow_lock(d);
+    if ( unlikely(!shadow_mode_enabled(d)) )
+        return;
+
+    /*
+     * Currently this does not fix up page ref counts, so it is valid to call
+     * only when a domain is being destroyed.
+     */
+    BUG_ON(!test_bit(DF_DYING, &d->d_flags));
+    d->arch.shadow_tainted_refcnts = 1;
 
     free_shadow_pages(d);
     free_writable_pte_predictions(d);
@@ -1135,26 +1143,6 @@ void shadow_mode_destroy(struct domain *d)
 
     free_shadow_ht_entries(d);
     free_out_of_sync_entries(d);
-
-    shadow_unlock(d);
-}    
-
-void __shadow_mode_disable(struct domain *d)
-{
-    // This needs rethinking for the full shadow mode stuff.
-    //
-    // Among other things, ref counts need to be restored to a sensible
-    // state for a non-shadow-mode guest...
-    // This is probably easiest to do by stealing code from audit_domain().
-    //
-    BUG();
-
-    free_shadow_pages(d);
-    
-    d->arch.shadow_mode = 0;
-
-    free_shadow_ht_entries(d);
-    free_out_of_sync_entries(d);
 }
 
 static int shadow_mode_table_op(
@@ -1293,7 +1281,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
     switch ( op )
     {
     case DOM0_SHADOW_CONTROL_OP_OFF:
-        shadow_mode_disable(d);
+        __shadow_mode_disable(d);
         break;
 
     case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
@@ -1303,12 +1291,14 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
 
     case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
         free_shadow_pages(d);
-        rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty);
+        rc = __shadow_mode_enable(
+            d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty);
         break;
 
     case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE:
         free_shadow_pages(d);
-        rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_enable|SHM_translate);
+        rc = __shadow_mode_enable(
+            d, d->arch.shadow_mode|SHM_enable|SHM_translate);
         break;
 
     default:
@@ -2166,6 +2156,9 @@ u32 shadow_remove_all_access(struct domain *d, unsigned long forbidden_gmfn)
     struct shadow_status *a;
     u32 count = 0;
 
+    if ( unlikely(!shadow_mode_enabled(d)) )
+        return 0;
+
     ASSERT(spin_is_locked(&d->arch.shadow_lock));
     perfc_incrc(remove_all_access);
 
index 512846c4b7b5d101769a0fff2e33b53022afa345..a54db49cdfe460578d7b768e25b2de4f87479b9d 100644 (file)
@@ -160,27 +160,11 @@ void vmx_do_launch(struct exec_domain *ed)
     unsigned int tr, cpu, error = 0;
     struct host_execution_env host_env;
     struct Xgt_desc_struct desc;
-    struct list_head *list_ent;
-    unsigned long i, pfn = 0;
+    unsigned long pfn = 0;
     struct pfn_info *page;
     execution_context_t *ec = get_execution_context();
-    struct domain *d = ed->domain;
 
-    cpu =  smp_processor_id();
-    d->arch.min_pfn = d->arch.max_pfn = 0;
-
-    spin_lock(&d->page_alloc_lock);
-    list_ent = d->page_list.next;
-
-    for ( i = 0; list_ent != &d->page_list; i++ )
-    {
-        pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
-        d->arch.min_pfn = min(d->arch.min_pfn, pfn);
-        d->arch.max_pfn = max(d->arch.max_pfn, pfn);
-        list_ent = frame_table[pfn].list.next;
-    }
-
-    spin_unlock(&d->page_alloc_lock);
+    cpu = smp_processor_id();
 
     page = (struct pfn_info *) alloc_domheap_page(NULL);
     pfn = (unsigned long) (page - frame_table);
index b4d66ca71b42ef62db8a625a541d7c75aa9939e5..7b6f0b13aecbb2a6cc8aba0ce34dbd35ece3a333 100644 (file)
@@ -562,6 +562,8 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order)
         for ( i = 0; i < (1 << order); i++ )
         {
             shadow_drop_references(d, &pg[i]);
+            ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) ||
+                   shadow_tainted_refcnts(d));
             pg[i].tlbflush_timestamp  = tlbflush_current_time();
             pg[i].u.free.cpu_mask     = d->cpuset;
             list_del(&pg[i].list);
index d63588c44791f7a7b012a58906a75efefa32e5d3..1d3872a48ebeaf6047850f2493c032b75b7cda6e 100644 (file)
@@ -26,11 +26,11 @@ struct arch_domain
     /* I/O-port access bitmap mask. */
     u8 *iobmp_mask;       /* Address of IO bitmap mask, or NULL.      */
 
-    /* shadow mode status and controls */
+    /* Shadow mode status and controls. */
     unsigned int shadow_mode;  /* flags to control shadow table operation */
     spinlock_t   shadow_lock;
-    unsigned long min_pfn;     /* min host physical */
-    unsigned long max_pfn;     /* max host physical */
+    /* Shadow mode has tainted page reference counts? */
+    unsigned int shadow_tainted_refcnts;
 
     /* shadow hashtable */
     struct shadow_status *shadow_ht;
index 4d954a2f2248806434cf2327a53f43d3e2b05d94..c17599afd2e5f6ec43f2e5922b22fccba7be7db1 100644 (file)
@@ -42,6 +42,8 @@
 #define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
 #define shadow_mode_external(_d)  ((_d)->arch.shadow_mode & SHM_external)
 
+#define shadow_tainted_refcnts(_d) ((_d)->arch.shadow_tainted_refcnts)
+
 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
 #define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
      (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
@@ -173,12 +175,14 @@ shadow_sync_va(struct exec_domain *ed, unsigned long gva)
 extern void __shadow_mode_disable(struct domain *d);
 static inline void shadow_mode_disable(struct domain *d)
 {
-    if ( shadow_mode_enabled(d) )
+    if ( unlikely(shadow_mode_enabled(d)) )
+    {
+        shadow_lock(d);
         __shadow_mode_disable(d);
+        shadow_unlock(d);
+    }
 }
 
-extern void shadow_mode_destroy(struct domain *d);
-
 /************************************************************************/
 
 #define __mfn_to_gpfn(_d, mfn)                         \
index a69bd59802da627cadb06f84508bfe188c5089cb..4248b30f80c762be54d9cacb2d3d5f6c4f51553d 100644 (file)
@@ -12,6 +12,7 @@
 
 #define shadow_drop_references(_d, _p)          ((void)0)
 #define shadow_sync_and_drop_references(_d, _p) ((void)0)
+#define shadow_tainted_refcnts(_d)              (0)
 
 #endif